In [1]:
import numpy as np

In [2]:
def divergence(F):
    """ compute the divergence of n-D scalar field `F` """
    return reduce(np.add,np.gradient(F))

In [3]:
F = np.random.rand(100,100, 2)

In [4]:
F


Out[4]:
array([[[0.8309781 , 0.7583944 ],
        [0.88322921, 0.274582  ],
        [0.14286456, 0.8010026 ],
        ...,
        [0.69006129, 0.86605906],
        [0.58184053, 0.11968498],
        [0.8344041 , 0.65732281]],

       [[0.32583804, 0.62316064],
        [0.24107316, 0.12332573],
        [0.73095455, 0.00981541],
        ...,
        [0.79910124, 0.32013929],
        [0.66649508, 0.66257074],
        [0.34204798, 0.35378482]],

       [[0.16205502, 0.92953131],
        [0.38594994, 0.9358332 ],
        [0.1068871 , 0.64052787],
        ...,
        [0.2328214 , 0.9179261 ],
        [0.99095834, 0.9711499 ],
        [0.87065886, 0.21513783]],

       ...,

       [[0.72312426, 0.88729952],
        [0.21048511, 0.06933395],
        [0.8645752 , 0.25733672],
        ...,
        [0.45016759, 0.60610769],
        [0.41126202, 0.25861989],
        [0.54822108, 0.8586362 ]],

       [[0.45138134, 0.17949857],
        [0.5146927 , 0.35091362],
        [0.80898221, 0.75460091],
        ...,
        [0.03240326, 0.05349207],
        [0.16500886, 0.70159113],
        [0.92716411, 0.67500229]],

       [[0.48031569, 0.98790519],
        [0.76599555, 0.58200885],
        [0.20424332, 0.20044698],
        ...,
        [0.46411493, 0.06292719],
        [0.93264516, 0.90471222],
        [0.24325688, 0.84625095]]])

In [5]:
F.ndim


Out[5]:
3

In [6]:
F.shape


Out[6]:
(100, 100, 2)

In [7]:
from functools import reduce

In [8]:
def divergence_a(field):
    "return the divergence of a n-D field"
    return np.sum(np.gradient(field),axis=0)

In [9]:
timeit divergence_a(F)


3 ms ± 220 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)

In [10]:
d_a = divergence(F)

In [11]:
d_a.shape


Out[11]:
(100, 100, 2)

In [12]:
def divergence_b(F):
    """ compute the divergence of n-D scalar field `F` """
    return reduce(np.add,np.gradient(F))

In [13]:
timeit divergence_b(F)


2.49 ms ± 156 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)

In [14]:
d_b = divergence_b(F)

In [15]:
d_b.shape


Out[15]:
(100, 100, 2)

In [16]:
np.allclose(d_a, d_b)


Out[16]:
True

In [17]:
F = np.random.rand(100,100, 2)

In [18]:
def my_divergence(F):
    dvx_dx = np.gradient(F[:, :, 0])[1]
    dvy_dy = -(np.gradient(F[:, :, 1])[0])
    return dvx_dx + dvy_dy

In [19]:
timeit my_divergence(F)


2.06 ms ± 508 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)

In [20]:
d_m = my_divergence(F)

In [21]:
F1, F2 = F[:, :, 0], F[:, :, 1]

In [22]:
def my_divergence_split(F1, F2):
    dvx_dx = np.gradient(F1, axis=1)
    dvy_dy = np.gradient(F2, axis=0)
    return dvx_dx - dvy_dy

In [23]:
timeit my_divergence_split(F1, F2)


1.37 ms ± 511 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)

In [24]:
d_s = my_divergence_split(F1, F2)

In [25]:
np.allclose(d_m, d_s)


Out[25]:
True